bitkeeper revision 1.825.8.1 (406b3b3aiVrvWmI8XjJ_64Sk92hivw)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 31 Mar 2004 21:42:18 +0000 (21:42 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 31 Mar 2004 21:42:18 +0000 (21:42 +0000)
Many files:
  Faster upcall masking from Xen to guest OS.

14 files changed:
tools/xc/lib/xc_linux_build.c
tools/xc/lib/xc_netbsd_build.c
xen/arch/i386/entry.S
xen/arch/i386/traps.c
xen/common/domain.c
xen/common/keyhandler.c
xen/common/schedule.c
xen/include/hypervisor-ifs/hypervisor-if.h
xen/include/xen/event.h
xen/include/xen/sched.h
xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S
xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c
xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h
xenolinux-2.4.25-sparse/include/asm-xen/system.h

index 92fff33a6adcbbd116332cd789843e14d8452925..3acfc2173d4d816cd3ac0fc9e33c59bc6d6c4151 100644 (file)
@@ -284,7 +284,9 @@ static int setup_guestos(int xc_handle,
     /* shared_info page starts its life empty. */
     shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
     memset(shared_info, 0, PAGE_SIZE);
-    shared_info->evtchn_upcall_mask = ~0UL; /* mask all upcalls */
+    /* Mask all upcalls... */
+    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+        shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
     unmap_pfn(pm_handle, shared_info);
 
     /* Send the page update requests down to the hypervisor. */
index ae7ebecc6e0608853ba7dbece98a91c05d688b79..a53018297c7bf745ab41478562f2c25ca647b422 100644 (file)
@@ -75,7 +75,7 @@ static int setup_guestos(int xc_handle,
     shared_info_t *shared_info;
     unsigned long ksize;
     mmu_t *mmu = NULL;
-    int pm_handle;
+    int pm_handle, i;
 
     memset(builddomain, 0, sizeof(*builddomain));
 
@@ -183,7 +183,9 @@ static int setup_guestos(int xc_handle,
     /* shared_info page starts its life empty. */
     shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
     memset(shared_info, 0, PAGE_SIZE);
-    shared_info->evtchn_upcall_mask = ~0UL; /* mask all upcalls */
+    /* Mask all upcalls... */
+    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+        shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
     unmap_pfn(pm_handle, shared_info);
 
     /* Send the page update requests down to the hypervisor. */
index eab11e5ad8af45d8792132f70317de56ed5b94c9..01419917049523230a997a473499b69f287a8f51 100644 (file)
@@ -112,8 +112,8 @@ FAILSAFE_SEL    = 32
 FAILSAFE_ADDR   = 36
 
 /* Offsets in shared_info_t */
-UPCALL_PENDING  = 0
-UPCALL_MASK     = 4
+#define UPCALL_PENDING /* 0 */
+#define UPCALL_MASK       1
 
 /* Offsets in guest_trap_bounce */
 GTB_ERROR_CODE   =  0
@@ -368,12 +368,11 @@ test_all_events:
         jnz  process_hyp_events
 /*test_guest_events:*/
         movl SHARED_INFO(%ebx),%eax
-        movl UPCALL_MASK(%eax),%ecx
-        notl %ecx
-        andl UPCALL_PENDING(%eax),%ecx  # ECX = pending & ~mask
-        andl $1,%ecx                    # Is bit 0 pending and not masked?
+        testb $0xFF,UPCALL_MASK(%eax)
+        jnz  restore_all_guest
+        testb $0xFF,UPCALL_PENDING(%eax)
         jz   restore_all_guest
-        lock btsl $0,UPCALL_MASK(%eax)  # Upcalls are masked during delivery
+        movb $1,UPCALL_MASK(%eax)       # Upcalls are masked during delivery
 /*process_guest_events:*/
         movzwl PROCESSOR(%ebx),%edx
         shl  $4,%edx                    # sizeof(guest_trap_bounce) == 16
index d10292f618aef51990465635676fd607a3f42f76..d50b101f3acd71b57da107cae4e96187c59da02c 100644 (file)
@@ -206,7 +206,7 @@ static inline void do_trap(int trapnr, char *str,
     gtb->cs         = ti->cs;
     gtb->eip        = ti->address;
     if ( TI_GET_IF(ti) )
-        set_bit(0, &p->shared_info->evtchn_upcall_mask);
+        p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
     return; 
 
  fault_in_hypervisor:
@@ -277,9 +277,7 @@ asmlinkage void do_int3(struct pt_regs *regs, long error_code)
     gtb->cs         = ti->cs;
     gtb->eip        = ti->address;
     if ( TI_GET_IF(ti) )
-        set_bit(0, &p->shared_info->evtchn_upcall_mask);
-    return;
-
+        p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
 }
 
 asmlinkage void do_double_fault(void)
@@ -353,7 +351,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
     gtb->cs         = ti->cs;
     gtb->eip        = ti->address;
     if ( TI_GET_IF(ti) )
-        set_bit(0, &p->shared_info->evtchn_upcall_mask);
+        p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
     return; 
 
  fault_in_hypervisor:
@@ -452,7 +450,7 @@ asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
     gtb->cs         = ti->cs;
     gtb->eip        = ti->address;
     if ( TI_GET_IF(ti) )
-        set_bit(0, &p->shared_info->evtchn_upcall_mask);
+        p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
     return;
 
  gp_in_kernel:
index f83562a9037f6892fd8de8ebe2b2d00f0a8efd0a..e86a5eba27fbee029bce6ef1d24fa8174087a247 100644 (file)
@@ -918,7 +918,9 @@ int construct_dom0(struct task_struct *p,
     /* Set up shared-info area. */
     update_dom_time(p->shared_info);
     p->shared_info->domain_time = 0;
-    p->shared_info->evtchn_upcall_mask = ~0UL; /* mask all upcalls */
+    /* Mask all upcalls... */
+    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+        p->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
 
     /* Install the new page tables. */
     __cli();
index 734df5cffa3f4e09042b8273cf2dc39bc5bf68f4..2f6a38417c9d4b67a1593a1d1776e3735da3f4a1 100644 (file)
@@ -94,8 +94,9 @@ void do_task_queues(u_char key, void *dev_id, struct pt_regs *regs)
         sched_prn_state(p ->state);
        printk(", hyp_events = %08x\n", p->hyp_events);
         s = p->shared_info; 
-        printk("Guest: upcall_pend = %08lx, upcall_mask = %08lx\n", 
-               s->evtchn_upcall_pending, s->evtchn_upcall_mask);
+        printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
+               s->vcpu_data[0].evtchn_upcall_pending, 
+               s->vcpu_data[0].evtchn_upcall_mask);
         printk("Notifying guest...\n"); 
         send_guest_virq(p, VIRQ_DEBUG);
     }
index 496b35b9a8f25759380ab5c777c61548c575a188..7e8d03dbc08a2d149814c4adf40c0b6649cdb9db 100644 (file)
@@ -220,7 +220,7 @@ void wake_up(struct task_struct *p)
 static long do_block(void)
 {
     ASSERT(current->domain != IDLE_DOMAIN_ID);
-    clear_bit(0, &current->shared_info->evtchn_upcall_mask);
+    current->shared_info->vcpu_data[0].evtchn_upcall_mask = 0;
     current->state = TASK_INTERRUPTIBLE;
     TRACE_2D(TRC_SCHED_BLOCK, current->domain, current);
     __enter_scheduler();
index 2335ed5ad20715d2905a0c02e9fb2a345e3ac553..3941b3494459f8a5e0e5e96e43b8156b29ad0ec1 100644 (file)
@@ -150,6 +150,9 @@ typedef struct
 /* Event channel endpoints per domain. */
 #define NR_EVENT_CHANNELS 1024
 
+/* No support for multi-processor guests. */
+#define MAX_VIRT_CPUS 1
+
 /*
  * Xen/guestos shared data -- pointer provided in start_info.
  * NB. We expect that this struct is smaller than a page.
@@ -157,13 +160,39 @@ typedef struct
 typedef struct shared_info_st
 {
     /*
-     * If bit 0 in evtchn_upcall_pending is transitioned 0->1, and bit 0 in 
-     * evtchn_upcall_mask is clear, then an asynchronous upcall is scheduled. 
-     * The upcall mask can be used to prevent unbounded reentrancy and stack 
-     * overflow (in this way, acts as a kind of interrupt-enable flag).
+     * Per-VCPU information goes here. This will be cleaned up more when Xen 
+     * actually supports multi-VCPU guests.
      */
-    unsigned long evtchn_upcall_pending;
-    unsigned long evtchn_upcall_mask;
+    struct {
+        /*
+         * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+         * a pending notification for a particular VCPU. It is then cleared 
+         * by the guest OS /before/ checking for pending work, thus avoiding
+         * a set-and-check race. Note that the mask is only accessed by Xen
+         * on the CPU that is currently hosting the VCPU. This means that the
+         * pending and mask flags can be updated by the guest without special
+         * synchronisation (i.e., no need for the x86 LOCK prefix).
+         * This may seem suboptimal because if the pending flag is set by
+         * a different CPU then an IPI may be scheduled even when the mask
+         * is set. However, note:
+         *  1. The task of 'interrupt holdoff' is covered by the per-event-
+         *     channel mask bits. A 'noisy' event that is continually being
+         *     triggered can be masked at source at this very precise
+         *     granularity.
+         *  2. The main purpose of the per-VCPU mask is therefore to restrict
+         *     reentrant execution: whether for concurrency control, or to
+         *     prevent unbounded stack usage. Whatever the purpose, we expect
+         *     that the mask will be asserted only for short periods at a time,
+         *     and so the likelihood of a 'spurious' IPI is suitably small.
+         * The mask is read before making an event upcall to the guest: a
+         * non-zero mask therefore guarantees that the VCPU will not receive
+         * an upcall activation. The mask is cleared when the VCPU requests
+         * to block: this avoids wakeup-waiting races.
+         */
+        u8 evtchn_upcall_pending;
+        u8 evtchn_upcall_mask;
+        u8 pad0, pad1;
+    } vcpu_data[MAX_VIRT_CPUS];
 
     /*
      * A domain can have up to 1024 "event channels" on which it can send
index 3dd4cf383edbc897ed6d2e3068b95bf8cf2fafee..542cd3c6ef342e33d076396626e5fe9c9940b619 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 /* Schedule an asynchronous callback for the specified domain. */
-static inline void __guest_notify(struct task_struct *p)
+static inline void guest_schedule_to_run(struct task_struct *p)
 {
 #ifdef CONFIG_SMP
     unsigned long flags, cpu_mask;
@@ -41,23 +41,11 @@ static inline void __guest_notify(struct task_struct *p)
 #endif
 }
 
-static inline void guest_notify(struct task_struct *p)
-{
-    /*
-     * Upcall already pending or upcalls masked?
-     * NB. Suitably synchronised on x86:
-     *  We must set the pending bit before checking the mask, but this is
-     *  guaranteed to occur because test_and_set_bit() is an ordering barrier.
-     */
-    if ( !test_and_set_bit(0, &p->shared_info->evtchn_upcall_pending) &&
-         !test_bit(0, &p->shared_info->evtchn_upcall_mask) )
-        __guest_notify(p);
-}
-
-
 /*
  * EVENT-CHANNEL NOTIFICATIONS
- * NB. As in guest_notify, evtchn_set_* is suitably synchronised on x86.
+ * NB. On x86, the atomic bit operations also act as memory barriers. There
+ * is therefore sufficiently strict ordering for this architecture -- others
+ * may require explicit memory barriers.
  */
 
 static inline void evtchn_set_pending(struct task_struct *p, int port)
@@ -66,7 +54,11 @@ static inline void evtchn_set_pending(struct task_struct *p, int port)
     if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
          !test_bit        (port,    &s->evtchn_mask[0])    &&
          !test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
-        guest_notify(p);
+    {
+        /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
+        p->shared_info->vcpu_data[0].evtchn_upcall_pending = 1;
+        guest_schedule_to_run(p);
+    }
 }
 
 static inline void evtchn_set_exception(struct task_struct *p, int port)
@@ -103,7 +95,7 @@ static inline void send_guest_pirq(struct task_struct *p, int pirq)
 static inline void send_hyp_event(struct task_struct *p, int event)
 {
     if ( !test_and_set_bit(event, &p->hyp_events) )
-        __guest_notify(p);
+        guest_schedule_to_run(p);
 }
 
 /* Called on return from (architecture-dependent) entry.S. */
index 4f506df04b39aea1cec667e5df5ff7fa9d872840..033f860c01be569659b98551adfde37f158707f9 100644 (file)
@@ -297,10 +297,10 @@ static inline long schedule_timeout(long timeout)
     return 0;
 }
 
-#define signal_pending(_p) \
-    (((_p)->hyp_events != 0) ||                                 \
-     (test_bit(0, &(_p)->shared_info->evtchn_upcall_pending) && \
-      !test_bit(0, &(_p)->shared_info->evtchn_upcall_mask)))
+#define signal_pending(_p)                                      \
+    ( (_p)->hyp_events ||                                       \
+      ((_p)->shared_info->vcpu_data[0].evtchn_upcall_pending && \
+       !(_p)->shared_info->vcpu_data[0].evtchn_upcall_mask) )
 
 void domain_init(void);
 
index c744f1bdcb17b5a91bcc6a4824233b2d7ecb2da9..b78c74fd9c15140e38e17e9304e2e64139dc70c2 100644 (file)
@@ -80,7 +80,7 @@ processor     = 52
 
 /* Offsets into shared_info_t. */
 #define evtchn_upcall_pending /* 0 */
-#define evtchn_upcall_mask       4
+#define evtchn_upcall_mask       1
 
 ENOSYS = 38
 
@@ -210,14 +210,14 @@ ENTRY(system_call)
        movl %eax,EAX(%esp)             # save the return value
 ENTRY(ret_from_sys_call)
         movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
-        lock btsl $0,evtchn_upcall_mask(%esi) # make tests atomic
+        movb $1,evtchn_upcall_mask(%esi) # make tests atomic
 ret_syscall_tests:
        cmpl $0,need_resched(%ebx)
        jne reschedule
        cmpl $0,sigpending(%ebx)
        je   safesti                    # ensure need_resched updates are seen
 signal_return:
-       lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+       movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
        movl %esp,%eax
        xorl %edx,%edx
        call SYMBOL_NAME(do_signal)
@@ -254,9 +254,9 @@ ret_from_exception:
 
        ALIGN
 reschedule:
-        lock btrl $0,evtchn_upcall_mask(%esi)  # reenable event callbacks
-       call SYMBOL_NAME(schedule)             # test
-       jmp ret_from_sys_call
+        movb $0,evtchn_upcall_mask(%esi)  # reenable event callbacks
+       call SYMBOL_NAME(schedule)        # test
+       jmp  ret_from_sys_call
 
 ENTRY(divide_error)
        pushl $0                # no error code
@@ -317,12 +317,12 @@ ENTRY(hypervisor_callback)
         movb CS(%esp),%cl
        test $2,%cl          # slow return to ring 2 or 3
        jne  ret_syscall_tests
-safesti:lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+safesti:movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
 scrit:  /**** START OF CRITICAL REGION ****/
-        testb $1,evtchn_upcall_pending(%esi)
+        testb $0xFF,evtchn_upcall_pending(%esi)
         jnz  14f              # process more events if necessary...
         RESTORE_ALL
-14:     lock btsl $0,evtchn_upcall_mask(%esi)
+14:     movb $1,evtchn_upcall_mask(%esi)
         jmp  11b
 ecrit:  /**** END OF CRITICAL REGION ****/
 # [How we do the fixup]. We want to merge the current stack frame with the
@@ -351,7 +351,7 @@ critical_region_fixup:
         jmp  11b
         
 critical_fixup_table:        
-        .byte 0x00,0x00,0x00                  # testb $1,(%esi)
+        .byte 0x00,0x00,0x00                  # testb $0xFF,(%esi)
         .byte 0x00,0x00                       # jnz  14f
         .byte 0x00                            # pop  %ebx
         .byte 0x04                            # pop  %ecx
@@ -364,7 +364,7 @@ critical_fixup_table:
         .byte 0x20                            # pop  %es
         .byte 0x24,0x24,0x24                  # add  $4,%esp
         .byte 0x28                            # iret
-        .byte 0x00,0x00,0x00,0x00,0x00,0x00   # lock btsl $0,4(%esi)
+        .byte 0x00,0x00,0x00,0x00             # movb $1,4(%esi)
         .byte 0x00,0x00                       # jmp  11b
 
 # Hypervisor uses this for application faults while it executes.
index c65806a7d9d490dbbbd07e1cd04b53053aebd277..7425f92047f43ecfba452e7eb90fe074c4408c60 100644 (file)
@@ -50,8 +50,10 @@ void evtchn_do_upcall(struct pt_regs *regs)
 
     local_irq_save(flags);
     
-    while ( synch_test_and_clear_bit(0, &s->evtchn_upcall_pending) )
+    while ( s->vcpu_data[0].evtchn_upcall_pending )
     {
+        s->vcpu_data[0].evtchn_upcall_pending = 0;
+        /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
         l1 = xchg(&s->evtchn_pending_sel, 0);
         while ( (l1i = ffs(l1)) != 0 )
         {
index 8dbb460cdacfc02d24daacb2964ef8228fbbad8c..ececad9447d59586d361e5fced486131e2970ff7 100644 (file)
@@ -42,10 +42,12 @@ static inline void unmask_evtchn(int port)
      * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
      */
     if (  synch_test_bit        (port,    &s->evtchn_pending[0]) && 
-         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) &&
-         !synch_test_and_set_bit(0,       &s->evtchn_upcall_pending) &&
-         !synch_test_bit        (0,       &s->evtchn_upcall_mask) )
-        evtchn_do_upcall(NULL);
+         !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+    {
+        s->vcpu_data[0].evtchn_upcall_pending = 1;
+        if ( !s->vcpu_data[0].evtchn_upcall_mask )
+            evtchn_do_upcall(NULL);
+    }
 }
 
 static inline void clear_evtchn(int port)
index 77b325d61a8c63e15452413c729e24eb70aafae5..86d6c7b150ae528dcdf4a052dfdb074fc64f3739 100644 (file)
@@ -302,42 +302,55 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 
 #define safe_halt()             ((void)0)
 
-/*
- * Note the use of synch_*_bit() operations in the following. These operations
- * ensure correct serialisation of checks and updates w.r.t. Xen executing on
- * a different CPU.
+/* 
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
+ * critical operations are executed. All critical operatiosn must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
  */
 
 #define __cli()                                                               \
 do {                                                                          \
-    synch_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask);            \
+    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
+    barrier();                                                                \
 } while (0)
 
 #define __sti()                                                               \
 do {                                                                          \
     shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    synch_clear_bit(0, &_shared->evtchn_upcall_mask);                         \
-    if ( unlikely(synch_test_bit(0, &_shared->evtchn_upcall_pending)) )       \
+    barrier();                                                                \
+    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
+    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
         evtchn_do_upcall(NULL);                                               \
 } while (0)
 
 #define __save_flags(x)                                                       \
 do {                                                                          \
-    (x) = synch_test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask);     \
+    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
 } while (0)
 
-#define __restore_flags(x) do { if (x) __cli(); else __sti(); } while (0)
+#define __restore_flags(x)                                                    \
+do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
+    barrier();                                                                \
+    if ( (_shared->vcpu_data[0].evtchn_upcall_mask = x) == 0 )                \
+        if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )          \
+            evtchn_do_upcall(NULL);                                           \
+} while (0)
 
 #define __save_and_cli(x)                                                     \
 do {                                                                          \
-    (x) = synch_test_and_set_bit(                                             \
-        0, &HYPERVISOR_shared_info->evtchn_upcall_mask);                      \
+    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
+    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
+    barrier();                                                                \
 } while (0)
 
 #define __save_and_sti(x)                                                     \
 do {                                                                          \
-    (x) = synch_test_and_clear_bit(                                           \
-        0, &HYPERVISOR_shared_info->evtchn_upcall_mask);                      \
+    barrier();                                                                \
+    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
+    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 0;              \
 } while (0)
 
 #define local_irq_save(x)       __save_and_cli(x)